%load_ext autoreload
%autoreload 2
%matplotlib inline
from keras import layers
from keras import models
from keras import optimizers
from keras.applications import VGG16
from keras.regularizers import l1, l2, l1_l2
import json
import os
import shutil
import matplotlib.pyplot as plt
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
import pickle
import seaborn as sns
import pandas as pd
import glob
import matplotlib.image as mpimg
root = '/userhome/34/ljiang/deep_learning/A2/Datasets/cat_dog_car_bike/'
train_dir = os.path.join(root, 'train')
val_dir = os.path.join(root, 'val')
test_dir = os.path.join(root, 'test')
def plot_history(history):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.figure(1, figsize=(10, 10))
plt.subplot(211)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.subplot(212)
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
MAPPING = {
0: 'cat',
1: 'dog',
2: 'car',
3: 'motorbike'
}
def save_history(history, index=None):
if index is None:
index = len(glob.glob('/userhome/34/ljiang/deep_learning/A2/ass2/history/*.p'))
with open('/userhome/34/ljiang/deep_learning/A2/ass2/history/history-%d.p' % index, 'wb') as f:
pickle.dump(history, f)
def plot_pickle(index):
path = '/userhome/34/ljiang/deep_learning/A2/ass2/history/history-%d.p' % index
with open(path, 'rb') as f:
history = pickle.load(f)
plot_history(history)
def plot_error(model, data_generator):
y_predict = model.predict_generator(data_generator)
for i in np.nonzero([np.argmax(y) for y in y_predict] != data_generator.classes)[0]:
file_name = data_generator.filenames[i]
file_path = os.path.join(data_generator.directory, file_name)
plt.imshow(mpimg.imread(file_path))
plt.title('%s: %s\n(%s)' % (file_name, MAPPING[np.argmax(y_predict[i])], list(y_predict[i])))
plt.show()
def load_model(file_name, root='/userhome/34/ljiang/deep_learning/A2/ass2/'):
return models.load_model(os.path.join(root, file_name))
aug_train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
aug_train_generator = aug_train_datagen.flow_from_directory(
train_dir,
target_size=(224, 224),
batch_size=20,
class_mode='categorical')
val_datagen = ImageDataGenerator(rescale=1./255)
val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(224, 224),
batch_size=20,
class_mode='categorical')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(224, 224),
batch_size=1,
class_mode='categorical',
shuffle=False)
def best_model():
conv_base = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
set_trainable = False
for layer in conv_base.layers:
if layer.name == 'block5_conv1':
set_trainable = True
layer.trainable = set_trainable
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(4, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(learning_rate=1e-4),
metrics=['acc'])
return model
model_q5 = best_model()
history_q5 = model_q5.fit_generator(
aug_train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=val_generator,
validation_steps=50)
# model_q5.save('/userhome/34/ljiang/deep_learning/A2/ass2/q5_model_1')
model_q5.summary()
model_q5.evaluate_generator(test_generator)
plot_history(hs[0]['history'])
model = load_model('q5_model_1')
plot_error(model, test_generator)
aug_train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
aug_train_generator = aug_train_datagen.flow_from_directory(
train_dir,
target_size=(224, 224),
batch_size=32,
class_mode='categorical')
val_datagen = ImageDataGenerator(rescale=1./255)
val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(224, 224),
batch_size=32,
class_mode='categorical')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(224, 224),
batch_size=1,
class_mode='categorical',
shuffle=False)
def best_model():
conv_base = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
set_trainable = False
for layer in conv_base.layers:
if layer.name == 'block5_conv1':
set_trainable = True
layer.trainable = set_trainable
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(4, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(learning_rate=1e-4),
metrics=['acc'])
return model
model_q5 = best_model()
history_q5 = model_q5.fit_generator(
aug_train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=val_generator,
validation_steps=50)
# model_q5.save('/userhome/34/ljiang/deep_learning/A2/ass2/q5_model_2')
model_q5.summary()
model_q5.evaluate_generator(test_generator)
plot_history(hs[1]['history'])
model = load_model('q5_model_2')
plot_error(model, test_generator)
aug_train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
aug_train_generator = aug_train_datagen.flow_from_directory(
train_dir,
target_size=(224, 224),
batch_size=32,
class_mode='categorical')
val_datagen = ImageDataGenerator(rescale=1./255)
val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(224, 224),
batch_size=32,
class_mode='categorical')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(224, 224),
batch_size=1,
class_mode='categorical',
shuffle=False)
def best_model():
conv_base = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
set_trainable = False
for layer in conv_base.layers:
if layer.name == 'block5_conv1':
set_trainable = True
layer.trainable = set_trainable
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(4, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(learning_rate=1e-4),
metrics=['acc'])
return model
model_q5 = best_model()
history_q5 = model_q5.fit_generator(
aug_train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=val_generator,
validation_steps=50)
hs.append({
'model': model_q5,
'history': history_q5,
})
# model_q5.save('/userhome/34/ljiang/deep_learning/A2/ass2/q5_model_3')
model_q5.summary()
model_q5.evaluate_generator(test_generator)
plot_history(history_q5)
model = load_model('q5_model_3')
plot_error(model, test_generator)
(block4_conv1 开始多训练几层 效果很差!!!)
aug_train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
aug_train_generator = aug_train_datagen.flow_from_directory(
train_dir,
target_size=(256, 256),
batch_size=32,
class_mode='categorical')
val_datagen = ImageDataGenerator(rescale=1./255)
val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(256, 256),
batch_size=32,
class_mode='categorical')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(256, 256),
batch_size=1,
class_mode='categorical',
shuffle=False)
def best_model():
conv_base = VGG16(weights='imagenet', include_top=False, input_shape=(256, 256, 3))
set_trainable = False
for layer in conv_base.layers:
if layer.name == 'block5_conv1':
set_trainable = True
layer.trainable = set_trainable
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(4, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(learning_rate=1e-4),
metrics=['acc'])
return model
model_q5 = best_model()
history_q5 = model_q5.fit_generator(
aug_train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=val_generator,
validation_steps=50)
model_q5.save('/userhome/34/ljiang/deep_learning/A2/ass2/q5_model_4')
save_history(history_q5, 4)
model_q5.summary()
model_q5.evaluate_generator(test_generator)
plot_history(history_q5)
aug_train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
aug_train_generator = aug_train_datagen.flow_from_directory(
train_dir,
target_size=(224, 224),
batch_size=32,
class_mode='categorical')
val_datagen = ImageDataGenerator(rescale=1./255)
val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(224, 224),
batch_size=32,
class_mode='categorical')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(224, 224),
batch_size=1,
class_mode='categorical',
shuffle=False)
def best_model():
conv_base = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
set_trainable = False
for layer in conv_base.layers:
if layer.name == 'block5_conv1':
set_trainable = True
layer.trainable = set_trainable
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.BatchNormalization())
model.add(layers.Dense(4, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(learning_rate=1e-4),
metrics=['acc'])
return model
model_q5 = best_model()
history_q5 = model_q5.fit_generator(
aug_train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=val_generator,
validation_steps=50)
model_q5.save('/userhome/34/ljiang/deep_learning/A2/ass2/q5_model_5')
save_history(history_q5, 5)
model_q5.summary()
model_q5.evaluate_generator(test_generator)
plot_history(history_q5)
plot_error(load_model('q5_model_5'), test_generator)
aug_train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
aug_train_generator = aug_train_datagen.flow_from_directory(
train_dir,
target_size=(224, 224),
batch_size=32,
class_mode='categorical')
val_datagen = ImageDataGenerator(rescale=1./255)
val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(224, 224),
batch_size=32,
class_mode='categorical')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(224, 224),
batch_size=1,
class_mode='categorical',
shuffle=False)
def best_model():
conv_base = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
set_trainable = False
for layer in conv_base.layers:
if layer.name == 'block5_conv1':
set_trainable = True
layer.trainable = set_trainable
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
# model.add(layers.Dropout(0.5))
model.add(layers.BatchNormalization())
model.add(layers.Dense(4, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(learning_rate=1e-4),
metrics=['acc'])
return model
model_q5 = best_model()
history_q5 = model_q5.fit_generator(
aug_train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=val_generator,
validation_steps=50)
model_q5.save('/userhome/34/ljiang/deep_learning/A2/ass2/q5_model_6')
save_history(history_q5, 6)
model_q5.summary()
model_q5.evaluate_generator(test_generator)
plot_history(history_q5)
model = load_model('q5_model_6')
plot_error(model, test_generator)
aug_train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest')
aug_train_generator = aug_train_datagen.flow_from_directory(
train_dir,
target_size=(224, 224),
batch_size=32,
class_mode='categorical')
val_datagen = ImageDataGenerator(rescale=1./255)
val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(224, 224),
batch_size=32,
class_mode='categorical')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(224, 224),
batch_size=1,
class_mode='categorical',
shuffle=False)
def best_model():
conv_base = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
set_trainable = False
for layer in conv_base.layers:
if layer.name == 'block5_conv1':
set_trainable = True
layer.trainable = set_trainable
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.BatchNormalization())
model.add(layers.Dense(4, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(learning_rate=1e-4),
metrics=['acc'])
return model
model_q5 = best_model()
history_q5 = model_q5.fit_generator(
aug_train_generator,
steps_per_epoch=100,
epochs=20,
validation_data=val_generator,
validation_steps=50)
model_q5.save('/userhome/34/ljiang/deep_learning/A2/ass2/q5_model_7')
save_history(history_q5, 7)
model_q5.summary()
model_q5.evaluate_generator(test_generator)
plot_history(history_q5)
# model = load_model('q5_model_3')
plot_error(model_q5, test_generator)
aug_train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
aug_train_generator = aug_train_datagen.flow_from_directory(
train_dir,
target_size=(256, 256),
batch_size=32,
class_mode='categorical')
val_datagen = ImageDataGenerator(rescale=1./255)
val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(256, 256),
batch_size=32,
class_mode='categorical')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(256, 256),
batch_size=1,
class_mode='categorical',
shuffle=False)
def best_model():
conv_base = VGG16(weights='imagenet', include_top=False, input_shape=(256, 256, 3))
set_trainable = False
for layer in conv_base.layers:
if layer.name == 'block5_conv1':
set_trainable = True
layer.trainable = set_trainable
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.BatchNormalization())
model.add(layers.Dense(4, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(learning_rate=1e-4),
metrics=['acc'])
return model
model_q5 = best_model()
history_q5 = model_q5.fit_generator(
aug_train_generator,
steps_per_epoch=100,
epochs=20,
validation_data=val_generator,
validation_steps=50)
model_q5.save('/userhome/34/ljiang/deep_learning/A2/ass2/q5_model_8')
save_history(history_q5, 8)
model_q5.summary()
model_q5.evaluate_generator(test_generator)
plot_history(history_q5)
# model = load_model('q5_model_3')
plot_error(model_q5, test_generator)